Advanced Lane Finding Project

The goals / steps of this project are the following:

  • Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
  • Apply a distortion correction to raw images.
  • Use color transforms, gradients, etc., to create a thresholded binary image.
  • Apply a perspective transform to rectify binary image ("birds-eye view").
  • Detect lane pixels and fit to find the lane boundary.
  • Determine the curvature of the lane and vehicle position with respect to center.
  • Warp the detected lane boundaries back onto the original image.
  • Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.

First, compute the camera calibration using chessboard images

In [1]:
#import necessary libs
import os
import pickle
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
%matplotlib inline

# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)

# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
In [2]:
# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')

# Step through the list and search for chessboard corners
for fname in images:
    img = cv2.imread(fname)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)

    # Find the chessboard corners
    ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
    

    # If found, add object points, image points
    if ret == True:
        objpoints.append(objp)
        imgpoints.append(corners)

        img2 = img.copy()
        # Draw and display the corners
        img2 = cv2.drawChessboardCorners(img2, (9,6), corners, ret)
        #cv2.imshow('img',img)
        
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
        ax1.imshow(img)
        ax1.set_title('Original Image', fontsize=15)
        ax2.imshow(img2)
        ax2.set_title('With Corners ', fontsize=15)

        plt.show()

#cv2.destroyAllWindows()

Then do one test with undistort image operation, and plot the sample image

In [3]:
# Test undistortion on an image
images = glob.glob('./camera_cal/calibration*.jpg')

# Step through the list and search for chessboard corners
for fname in images:    
    print(fname)
    img = cv2.imread(fname)
    img_size = (img.shape[1], img.shape[0])

    # Do camera calibration given object points and image points
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)


    dst = cv2.undistort(img, mtx, dist, None, mtx)
    cv2.imwrite("calibration_wide/"+fname+"_undist.jpg",dst)

    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
    ax1.imshow(img)
    ax1.set_title('Original Image', fontsize=15)
    ax2.imshow(dst)
    ax2.set_title('Undistorted Image', fontsize=15)
./camera_cal/calibration1.jpg
./camera_cal/calibration10.jpg
./camera_cal/calibration11.jpg
./camera_cal/calibration12.jpg
./camera_cal/calibration13.jpg
./camera_cal/calibration14.jpg
./camera_cal/calibration15.jpg
./camera_cal/calibration16.jpg
./camera_cal/calibration17.jpg
./camera_cal/calibration18.jpg
./camera_cal/calibration19.jpg
./camera_cal/calibration2.jpg
./camera_cal/calibration20.jpg
./camera_cal/calibration3.jpg
./camera_cal/calibration4.jpg
./camera_cal/calibration5.jpg
./camera_cal/calibration6.jpg
./camera_cal/calibration7.jpg
./camera_cal/calibration8.jpg
./camera_cal/calibration9.jpg

save camera metrix and distortion coeffieiencies

In [4]:
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
pickle.dump( dist_pickle, open( "wide_dist_pickle.p", "wb" ) )

print("saved mtx and dist to pickle")
saved mtx and dist to pickle

Like in the P1, we need to specify one area of our interest, then we only process this area of the whole image

In [5]:
def region_of_interest(img, vertices):
    """
    Applies an image mask.
    
    Only keeps the region of the image defined by the polygon
    formed from `vertices`. The rest of the image is set to black.
    """
    #defining a blank mask to start with
    mask = np.zeros_like(img)   
    
    #defining a 3 channel or 1 channel color to fill the mask with depending on the input image
    if len(img.shape) > 2:
        channel_count = img.shape[2]  # i.e. 3 or 4 depending on your image
        ignore_mask_color = (255,) * channel_count
    else:
        ignore_mask_color = 255
        
    #filling pixels inside the polygon defined by "vertices" with the fill color    
    cv2.fillPoly(mask, vertices, ignore_mask_color)
    
    #returning the image only where mask pixels are nonzero
    masked_image = cv2.bitwise_and(img, mask)
    return masked_image

print("Function for region of interest is loaded")
Function for region of interest is loaded

More Functions for image processing

In [6]:
# Calculate directional gradient
def abs_sobel_thresh(gray, orient='x', sobel_kernel=3, thresh=(0, 255)):
    # Apply x or y gradient
    if orient == 'x':
    	sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    elif orient == 'y':
    	sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # Take the absolute values
    sobel = np.absolute(sobel)
    # Rescale back to 8 bit integer
    scaled_sobel = np.uint8(255*sobel/np.max(sobel))
    # Create a copy and apply the threshold
    binary_output = np.zeros_like(scaled_sobel)
    binary_output[(scaled_sobel > thresh[0]) & (scaled_sobel < thresh[1])] = 1
    # Return the result
    return binary_output

# Calculate gradient magnitude
def mag_thresh(gray, sobel_kernel=3, mag_thresh=(0, 255)):
    # Apply x and y gradients
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    # Calculate the gradient magnitude
    sobel = np.sqrt(sobelx ** 2 + sobely ** 2)
    # Rescale back to 8 bit integer
    scaled_sobel = np.uint8(255*sobel/np.max(sobel))
    # Create a copy and apply the threshold
    binary_output = np.zeros_like(scaled_sobel)
    binary_output[(scaled_sobel > mag_thresh[0]) & (scaled_sobel < mag_thresh[1])] = 1
    # Return the result
    return binary_output

# Calculate gradient direction
def dir_threshold(gray, sobel_kernel=3, thresh=(0, np.pi/2)):
    # Apply x and y gradients
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
    
    # Error statement to ignore division and invalid errors
    with np.errstate(divide='ignore', invalid='ignore'):
        absgraddir = np.absolute(np.arctan(sobely/sobelx))
        dir_binary =  np.zeros_like(absgraddir)
        dir_binary[(absgraddir > thresh[0]) & (absgraddir < thresh[1])] = 1
    # Return the result
    return dir_binary

print("Functions for edge dections are created")
Functions for edge dections are created

Function to process the image as one pipeline

In [7]:
# Edit this function to create your own pipeline.
def pipeline(img):     
    # Gaussian Blur
    kernel_size = 5
    img = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
    # Convert to HLS color space and separate the S channel
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    s = hls[:,:,2]
    # Grayscale image
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Define sobel kernel size
    ksize = 7
    # Apply each of the thresholding functions
    gradx = abs_sobel_thresh(gray, orient='x', sobel_kernel=ksize, thresh=(10, 255))
    grady = abs_sobel_thresh(gray, orient='y', sobel_kernel=ksize, thresh=(60, 255))
    mag_binary = mag_thresh(gray, sobel_kernel=ksize, mag_thresh=(40, 255))
    dir_binary = dir_threshold(gray, sobel_kernel=ksize, thresh=(.65, 1.05))
    # Combine all the thresholding information
    combined = np.zeros_like(dir_binary)
    combined[((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))] = 1
    # Threshold color channel
    s_binary = np.zeros_like(combined)
    s_binary[(s > 160) & (s < 255)] = 1
    # Stack each channel to view their individual contributions in green and blue respectively
    # This returns a stack of the two binary images, whose components you can see as different colors    
    color_binary = np.zeros_like(combined)
    color_binary[(s_binary > 0) | (combined > 0)] = 1
    # Defining vertices for marked area
    imshape = img.shape
    left_bottom = (100, imshape[0])
    right_bottom = (imshape[1]-20, imshape[0])
    apex1 = (610, 410)
    apex2 = (680, 410)
    inner_left_bottom = (310, imshape[0])
    inner_right_bottom = (1150, imshape[0])
    inner_apex1 = (700,480)
    inner_apex2 = (650,480)
    vertices = np.array([[left_bottom, apex1, apex2, \
                          right_bottom, inner_right_bottom, \
                          inner_apex1, inner_apex2, inner_left_bottom]], dtype=np.int32)
    # Masked area
    color_binary = region_of_interest(color_binary, vertices)
    return color_binary
    

Test output of the pipeline Function with test images

In [8]:
for i in range(1,7):
    fname = 'test_images/test{}.jpg'.format(i)
    image = cv2.imread(fname)
    result = pipeline(image)

    # Plot the result
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()

    ax1.imshow(image)
    ax1.set_title('Original Image', fontsize=40)

    ax2.imshow(result, cmap='gray')
    ax2.set_title('Pipeline Result', fontsize=40)
   
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

Function to undistort image with calibration parameters, and transform image perspective, then return a warped (bird-view) image

In [9]:
# Define image shape
image_shape = image.shape
print("image shape:",image_shape)

# Define the region
area_of_interest = [[150+430,460],[1150-440,460],[1150,720],[150,720]]

# Define a function that takes an image, number of x and y points, 
# camera matrix and distortion coefficients
def corners_unwarp(img, mtx, dist):
    # Use the OpenCV undistort() function to remove distortion
    undist = cv2.undistort(img, mtx, dist, None, mtx)
    # Choose an offset from image corners to plot detected corners
    offset1 = 200 # offset for dst points x value
    offset2 = 0 # offset for dst points bottom y value
    offset3 = 0 # offset for dst points top y value
    # Grab the image shape
    img_size = (gray.shape[1], gray.shape[0])
    # For source points I'm grabbing the outer four detected corners
    src = np.float32(area_of_interest)
    # For destination points, I'm arbitrarily choosing some points to be
    # a nice fit for displaying our warped result 
    dst = np.float32([[offset1, offset3], 
                      [img_size[0]-offset1, offset3], 
                      [img_size[0]-offset1, img_size[1]-offset2], 
                      [offset1, img_size[1]-offset2]])
    # Given src and dst points, calculate the perspective transform matrix
    M = cv2.getPerspectiveTransform(src, dst)
    Minv = cv2.getPerspectiveTransform(dst, src)
    # Warp the image using OpenCV warpPerspective()
    warped = cv2.warpPerspective(undist, M, img_size)
    # Return the resulting image and matrix
    return warped, M, Minv

print("corner_unwarp function is created")
image shape: (720, 1280, 3)
corner_unwarp function is created

Test the image warp Function to show the bird view images

In [10]:
for i in range(1,7):
    fname = 'test_images/test{}.jpg'.format(i)
    image = cv2.imread(fname)
    result, perspective_M, perspective_Minv = corners_unwarp(image, mtx, dist)

    # Plot the result
    f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
    f.tight_layout()

    ax1.imshow(image)
    ax1.set_title('Original Image', fontsize=40)

    ax2.imshow(result, cmap='gray')
    ax2.set_title('Pipeline Result', fontsize=40)
   
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

Find lines and curvature radius, draw poly lines

In [11]:
# Define a class to receive the characteristics of each line detection
class Line():
    def __init__(self):
        # was the line detected in the last iteration?
        self.detected = False  
        # x values of the last n fits of the line
        self.recent_xfitted = [] 
        #average x values of the fitted line over the last n iterations
        self.bestx = None     
        #polynomial coefficients averaged over the last n iterations
        self.best_fit = None  
        #polynomial coefficients for the most recent fit
        self.current_fit = [np.array([False])]  
        #radius of curvature of the line in some units
        self.radius_of_curvature = None 
        #distance in meters of vehicle center from the line
        self.line_base_pos = None 
        #difference in fit coefficients between last and new fits
        self.diffs = np.array([0,0,0], dtype='float') 
        #x values for detected line pixels
        self.allx = None  
        #y values for detected line pixels
        self.ally = None
        #x values in windows
        self.windows = np.ones((3,12))*-1
        
print("line class is defined")
line class is defined
In [12]:
def find_curvature(yvals, fitx):
    # Define y-value where we want radius of curvature
    # I'll choose the maximum y-value, corresponding to the bottom of the image
    y_eval = np.max(yvals)
    # Define conversions in x and y from pixels space to meters
    ym_per_pix = 30/720 # meters per pixel in y dimension
    xm_per_pix = 3.7/700 # meteres per pixel in x dimension
    fit_cr = np.polyfit(yvals*ym_per_pix, fitx*xm_per_pix, 2)
    curverad = ((1 + (2*fit_cr[0]*y_eval + fit_cr[1])**2)**1.5) \
                                 /np.absolute(2*fit_cr[0])
    return curverad

def find_position(pts):
    # Find the position of the car from the center
    # It will show if the car is 'x' meters from the left or right
    position = image_shape[1]/2
    left  = np.min(pts[(pts[:,1] < position) & (pts[:,0] > 700)][:,1])
    right = np.max(pts[(pts[:,1] > position) & (pts[:,0] > 700)][:,1])
    center = (left + right)/2
    # Define conversions in x and y from pixels space to meters
    xm_per_pix = 3.7/700 # meteres per pixel in x dimension    
    return (position - center)*xm_per_pix
In [13]:
def find_nearest(array,value):
    # Function to find the nearest point from array
    if len(array) > 0:
        idx = (np.abs(array-value)).argmin()
        return array[idx]

def find_peaks(image, y_window_top, y_window_bottom, x_left, x_right):
    # Find the historgram from the image inside the window
    #histogram = np.sum(image[y_window_top:y_window_bottom,:], axis=0)

        
    # Find the max from the histogram
    histogram = np.sum(img[img.shape[0]//2:,:], axis=0)
    #plt.plot(histogram)

    
    if len(histogram[int(x_left):int(x_right)])>0:
        return np.argmax(histogram[int(x_left):int(x_right)]) + x_left
    else:
        return (x_left + x_right) / 2        

def sanity_check(lane, curverad, fitx, fit):       
    # Sanity check for the lane
    if lane.detected: # If lane is detected
        # If sanity check passes
        if abs(curverad / lane.radius_of_curvature - 1) < .6:        
            lane.detected = True
            lane.current_fit = fit
            lane.allx = fitx
            lane.bestx = np.mean(fitx)            
            lane.radius_of_curvature = curverad
            lane.current_fit = fit
        # If sanity check fails use the previous values
        else:
            lane.detected = False
            fitx = lane.allx
    else:
        # If lane was not detected and no curvature is defined
        if lane.radius_of_curvature: 
            if abs(curverad / lane.radius_of_curvature - 1) < 1:            
                lane.detected = True
                lane.current_fit = fit
                lane.allx = fitx
                lane.bestx = np.mean(fitx)            
                lane.radius_of_curvature = curverad
                lane.current_fit = fit
            else:
                lane.detected = False
                fitx = lane.allx      
        # If curvature was defined
        else:
            lane.detected = True
            lane.current_fit = fit
            lane.allx = fitx
            lane.bestx = np.mean(fitx)
            lane.radius_of_curvature = curverad
    return fitx

# Sanity check for the direction
def sanity_check_direction(right, right_pre, right_pre2):
    # If the direction is ok then pass
    if abs((right-right_pre) / (right_pre-right_pre2) - 1) < .2:
        return right
    # If not then compute the value from the previous values
    else:
        return right_pre + (right_pre - right_pre2)
    
# find_lanes function will detect left and right lanes from the warped image.
# 'n' windows will be used to identify peaks of histograms    
def find_lanes(n, image, x_window, lanes, \
               left_lane_x, left_lane_y, right_lane_x, right_lane_y, window_ind):
    # 'n' windows will be used to identify peaks of histograms
    # Set index1. This is used for placeholder.
    index1 = np.zeros((n+1,2))
    index1[0] = [300, 1100]
    index1[1] = [300, 1100]
    # Set the first left and right values
    left, right = (300, 1100)
    # Set the center
    center = 700
    # Set the previous center
    center_pre = center
    # Set the direction
    direction = 0
    for i in range(n-1):
        # set the window range.
        y_window_top = 720-720/n*(i+1)
        y_window_bottom = 720-720/n*i
        # If left and right lanes are detected from the previous image
        if (left_lane.detected==False) and (right_lane.detected==False):
            # Find the historgram from the image inside the window
            left  = find_peaks(image, y_window_top, y_window_bottom, index1[i+1,0]-200, index1[i+1,0]+200)
            right = find_peaks(image, y_window_top, y_window_bottom, index1[i+1,1]-200, index1[i+1,1]+200)
            # Set the direction
            left  = sanity_check_direction(left, index1[i+1,0], index1[i,0])
            right = sanity_check_direction(right, index1[i+1,1], index1[i,1]) 
            # Set the center
            center_pre = center
            center = (left + right)/2
            direction = center - center_pre
        # If both lanes were detected in the previous image
        # Set them equal to the previous one
        else:
            left  = left_lane.windows[window_ind, i]
            right = right_lane.windows[window_ind, i]
        # Make sure the distance between left and right laens are wide enough
        if abs(left-right) > 600:
            # Append coordinates to the left lane arrays
            left_lane_array = lanes[(lanes[:,1]>=left-x_window) & (lanes[:,1]<left+x_window) &
                                 (lanes[:,0]<=y_window_bottom) & (lanes[:,0]>=y_window_top)]
            left_lane_x += left_lane_array[:,1].flatten().tolist()
            left_lane_y += left_lane_array[:,0].flatten().tolist()
            if not math.isnan(np.mean(left_lane_array[:,1])):
                left_lane.windows[window_ind, i] = np.mean(left_lane_array[:,1])
                index1[i+2,0] = np.mean(left_lane_array[:,1])
            else:
                index1[i+2,0] = index1[i+1,0] + direction
                left_lane.windows[window_ind, i] = index1[i+2,0]
            # Append coordinates to the right lane arrays            
            right_lane_array = lanes[(lanes[:,1]>=right-x_window) & (lanes[:,1]<right+x_window) &
                                  (lanes[:,0]<y_window_bottom) & (lanes[:,0]>=y_window_top)]
            right_lane_x += right_lane_array[:,1].flatten().tolist()
            right_lane_y += right_lane_array[:,0].flatten().tolist()
            if not math.isnan(np.mean(right_lane_array[:,1])):
                right_lane.windows[window_ind, i] = np.mean(right_lane_array[:,1])
                index1[i+2,1] = np.mean(right_lane_array[:,1])
            else:
                index1[i+2,1] = index1[i+1,1] + direction
                right_lane.windows[window_ind, i] = index1[i+2,1]
    return left_lane_x, left_lane_y, right_lane_x, right_lane_y

print("Accessory functions are created")
Accessory functions are created
In [14]:
import math
# Function to find the fitting lines from the warped image
def fit_lanes(image):
    # define y coordinate values for plotting
    yvals = np.linspace(0, 100, num=101)*7.2  # to cover same y-range as image
    # find the coordinates from the image
    lanes = np.argwhere(image)
    # Coordinates for left lane
    left_lane_x = []
    left_lane_y = []
    # Coordinates for right lane
    right_lane_x = []
    right_lane_y = []
    # Curving left or right - -1: left 1: right
    curve = 0
    # Set left and right as None
    left = None
    right = None
    # Find lanes from three repeated procedures with different window values
    left_lane_x, left_lane_y, right_lane_x, right_lane_y \
        = find_lanes(4, image, 25, lanes, \
                     left_lane_x, left_lane_y, right_lane_x, right_lane_y, 0)
    left_lane_x, left_lane_y, right_lane_x, right_lane_y \
        = find_lanes(6, image, 50, lanes, \
                     left_lane_x, left_lane_y, right_lane_x, right_lane_y, 1)
    left_lane_x, left_lane_y, right_lane_x, right_lane_y \
        = find_lanes(8, image, 75, lanes, \
                     left_lane_x, left_lane_y, right_lane_x, right_lane_y, 2)
    # Find the coefficients of polynomials
    left_fit = np.polyfit(left_lane_y, left_lane_x, 2)
    left_fitx = left_fit[0]*yvals**2 + left_fit[1]*yvals + left_fit[2]
    right_fit = np.polyfit(right_lane_y, right_lane_x, 2)
    right_fitx = right_fit[0]*yvals**2 + right_fit[1]*yvals + right_fit[2]
    # Find curvatures
    left_curverad  = find_curvature(yvals, left_fitx)
    right_curverad = find_curvature(yvals, right_fitx)
    # Sanity check for the lanes
    left_fitx  = sanity_check(left_lane, left_curverad, left_fitx, left_fit)
    right_fitx = sanity_check(right_lane, right_curverad, right_fitx, right_fit)
    
    return yvals, left_fitx, right_fitx, left_lane_x, left_lane_y, right_lane_x, right_lane_y, left_curverad

print("fit_lanes are done")
fit_lanes are done
In [15]:
# draw poly on an image
# def draw_poly(image, warped, yvals, left_fitx, right_fitx, Minv):
def draw_poly(image, warped, yvals, left_fitx, right_fitx, 
              left_lane_x, left_lane_y, right_lane_x, right_lane_y, Minv, curvature):
    # Create an image to draw the lines on
    warp_zero = np.zeros_like(warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array([np.transpose(np.vstack([left_fitx, yvals]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, yvals])))])
    pts = np.hstack((pts_left, pts_right))
    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0])) 
    # Combine the result with the original image
    result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)
    # Put text on an image
    font = cv2.FONT_HERSHEY_SIMPLEX
    text = "Radius of Curvature: {} m".format(int(curvature))
    cv2.putText(result,text,(400,100), font, 1,(255,255,255),2)
    # Find the position of the car
    pts = np.argwhere(newwarp[:,:,1])
    position = find_position(pts)
    if position < 0:
        text = "Vehicle is {:.2f} m left of center".format(-position)
    else:
        text = "Vehicle is {:.2f} m right of center".format(position)
    cv2.putText(result,text,(400,150), font, 1,(255,255,255),2)
    
    
    return result

print("Draw poly on a original image")
Draw poly on a original image

This is the master Function which will be used when processing the image frames

In [16]:
# This function will color the image
# Input: Original image
# Output: Original image with colored region
def process_image(image):
    # Apply pipeline to the image to create black and white image
    img = pipeline(image)
    
    
    # Warp the image to make lanes parallel to each other
    top_down, perspective_M, perspective_Minv = corners_unwarp(img, mtx, dist)
    # Find the lines fitting to left and right lanes
    a, b, c, lx, ly, rx, ry, curvature = fit_lanes(top_down)
    # Return the original image with colored region
    return draw_poly(image, top_down, a, b, c, lx, ly, rx, ry, perspective_Minv, curvature)

Test pipeline and warp Functions, and plot test images and inter-mediate processed images

In [17]:
# These are to be used to plot lines on images
x_values = [area_of_interest[0][0],area_of_interest[1][0],area_of_interest[2][0],area_of_interest[3][0],area_of_interest[0][0]]
y_values = [area_of_interest[0][1],area_of_interest[1][1],area_of_interest[2][1],area_of_interest[3][1],area_of_interest[0][1]]

# Plot 6 example images and warp them
for i in range(1,7):
    # Set up lines for left and right
    left_lane = Line()
    right_lane = Line()
    # load the image
    fname = 'test_images/test{}.jpg'.format(i)
    img_raw = cv2.imread(fname)
    # Apply pipeline to the image to create black and white image
    img = pipeline(img_raw)
    # Unwrap the image
    top_down, perspective_M, perspective_Minv = corners_unwarp(img, mtx, dist)
    # Set up for the subplots
    f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(24, 9))
    f.tight_layout()
    # Showing the image from pipeline with marked areas
    ax1.set_title('Gray Image with area of interest', fontsize=20)
    ax1.plot(x_values,y_values,'r-',lw=2)
    ax1.imshow(img, cmap='gray')
    # Find lanes from the warped image
    a, b, c, _, _, _, _, _ = fit_lanes(top_down)
    ax2.plot(b, a, color='green', linewidth=5)
    ax2.plot(c, a, color='blue', linewidth=5)
    ax2.imshow(top_down, cmap='gray')
    ax2.set_title('Undistorted and Warped Image', fontsize=20)
    # Set up lines for left and right
    left_lane = Line()
    right_lane = Line()    
    # Use the information from fit_lane function to color the lanes
    image_color = process_image(img_raw)
    ax3.imshow(image_color)
    ax3.set_title('Image with a color', fontsize=20)
    # Plot three histograms
    top_down[top_down > 0] = 1
    histogram = np.sum(top_down[:240,:], axis=0)
    ax4.plot(histogram)
    histogram = np.sum(top_down[240:480,:], axis=0)
    ax4.plot(histogram)
    histogram = np.sum(top_down[480:,:], axis=0)
    ax4.plot(histogram)
    #indexes = find_peaks_cwt(histogram, np.arange(1, 550))
    ax4.set_title("histogram")
    # Adjusting subplots
    plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
/Users/huapinggu/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/ipykernel_launcher.py:62: RuntimeWarning: divide by zero encountered in double_scalars
/Users/huapinggu/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/numpy/core/fromnumeric.py:2889: RuntimeWarning: Mean of empty slice.
  out=out, **kwargs)
/Users/huapinggu/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/numpy/core/_methods.py:80: RuntimeWarning: invalid value encountered in double_scalars
  ret = ret.dtype.type(ret / rcount)

Load video and process each frames, then save to a file

In [18]:
### Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# Set up lines for left and right
left_lane = Line()
right_lane = Line()
white_output = 'project_video_output.mp4'
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
[MoviePy] >>>> Building video project_video_output.mp4
[MoviePy] Writing video project_video_output.mp4
100%|█████████▉| 1260/1261 [05:47<00:00,  3.64it/s]
[MoviePy] Done.
[MoviePy] >>>> Video ready: project_video_output.mp4 

CPU times: user 5min 38s, sys: 1min 26s, total: 7min 4s
Wall time: 5min 48s
In [19]:
HTML("""
<video width="960" height="540" controls>
  <source src="{0}">
</video>
""".format(white_output))
Out[19]:

Make a short gif

In [24]:
clip = (VideoFileClip(white_output)
        .subclip((0,0.1))
        .resize(0.5))
white_clip.write_gif("project_video_output.gif")
[MoviePy] Building file project_video_output.gif with imageio

  0%|          | 0/1261 [00:00<?, ?it/s]

  0%|          | 1/1261 [00:00<12:17,  1.71it/s]

  0%|          | 2/1261 [00:01<12:29,  1.68it/s]

  0%|          | 3/1261 [00:01<12:05,  1.73it/s]

  0%|          | 4/1261 [00:02<11:53,  1.76it/s]

  0%|          | 5/1261 [00:02<11:43,  1.78it/s]
  0%|          | 6/1261 [00:03<11:26,  1.83it/s]
  1%|          | 7/1261 [00:03<10:59,  1.90it/s]
  1%|          | 8/1261 [00:04<10:49,  1.93it/s]
  1%|          | 9/1261 [00:04<10:30,  1.99it/s]
  1%|          | 10/1261 [00:05<10:20,  2.02it/s]
  1%|          | 11/1261 [00:05<10:23,  2.01it/s]
  1%|          | 12/1261 [00:06<10:22,  2.01it/s]
  1%|          | 13/1261 [00:06<10:11,  2.04it/s]
  1%|          | 14/1261 [00:07<10:43,  1.94it/s]
  1%|          | 15/1261 [00:08<11:52,  1.75it/s]
  1%|▏         | 16/1261 [00:08<12:41,  1.64it/s]
  1%|▏         | 17/1261 [00:09<12:05,  1.72it/s]
  1%|▏         | 18/1261 [00:09<11:30,  1.80it/s]
  2%|▏         | 19/1261 [00:10<11:20,  1.82it/s]
  2%|▏         | 20/1261 [00:10<11:38,  1.78it/s]
  2%|▏         | 21/1261 [00:11<12:27,  1.66it/s]
  2%|▏         | 22/1261 [00:12<12:08,  1.70it/s]
  2%|▏         | 23/1261 [00:12<11:32,  1.79it/s]
  2%|▏         | 24/1261 [00:13<11:13,  1.84it/s]
  2%|▏         | 25/1261 [00:13<11:47,  1.75it/s]
  2%|▏         | 26/1261 [00:14<11:13,  1.83it/s]
  2%|▏         | 27/1261 [00:14<11:11,  1.84it/s]
  2%|▏         | 28/1261 [00:15<11:10,  1.84it/s]
  2%|▏         | 29/1261 [00:15<11:51,  1.73it/s]
  2%|▏         | 30/1261 [00:16<13:10,  1.56it/s]
  2%|▏         | 31/1261 [00:17<12:20,  1.66it/s]
  3%|▎         | 32/1261 [00:17<12:08,  1.69it/s]
  3%|▎         | 33/1261 [00:18<13:22,  1.53it/s]
  3%|▎         | 34/1261 [00:19<13:25,  1.52it/s]
  3%|▎         | 35/1261 [00:19<13:00,  1.57it/s]
  3%|▎         | 36/1261 [00:20<12:10,  1.68it/s]
  3%|▎         | 37/1261 [00:20<11:39,  1.75it/s]
  3%|▎         | 38/1261 [00:21<11:57,  1.71it/s]
  3%|▎         | 39/1261 [00:22<11:26,  1.78it/s]
  3%|▎         | 40/1261 [00:22<10:58,  1.85it/s]
  3%|▎         | 41/1261 [00:23<10:42,  1.90it/s]
  3%|▎         | 42/1261 [00:23<10:37,  1.91it/s]
  3%|▎         | 43/1261 [00:24<10:27,  1.94it/s]
  3%|▎         | 44/1261 [00:24<11:31,  1.76it/s]
  4%|▎         | 45/1261 [00:25<13:00,  1.56it/s]
  4%|▎         | 46/1261 [00:26<12:10,  1.66it/s]
  4%|▎         | 47/1261 [00:26<11:40,  1.73it/s]
  4%|▍         | 48/1261 [00:27<11:13,  1.80it/s]
  4%|▍         | 49/1261 [00:27<10:56,  1.85it/s]
  4%|▍         | 50/1261 [00:28<11:19,  1.78it/s]
  4%|▍         | 51/1261 [00:28<10:58,  1.84it/s]
  4%|▍         | 52/1261 [00:29<11:00,  1.83it/s]
  4%|▍         | 53/1261 [00:29<10:59,  1.83it/s]
  4%|▍         | 54/1261 [00:30<10:45,  1.87it/s]
  4%|▍         | 55/1261 [00:30<10:31,  1.91it/s]
  4%|▍         | 56/1261 [00:31<11:18,  1.78it/s]
  5%|▍         | 57/1261 [00:31<11:00,  1.82it/s]
  5%|▍         | 58/1261 [00:32<10:55,  1.83it/s]
  5%|▍         | 59/1261 [00:33<10:42,  1.87it/s]
  5%|▍         | 60/1261 [00:33<10:27,  1.91it/s]
  5%|▍         | 61/1261 [00:34<10:21,  1.93it/s]
  5%|▍         | 62/1261 [00:34<10:04,  1.98it/s]
  5%|▍         | 63/1261 [00:34<09:53,  2.02it/s]
  5%|▌         | 64/1261 [00:35<09:48,  2.03it/s]
  5%|▌         | 65/1261 [00:35<09:52,  2.02it/s]
  5%|▌         | 66/1261 [00:36<10:01,  1.99it/s]
  5%|▌         | 67/1261 [00:36<10:04,  1.97it/s]
  5%|▌         | 68/1261 [00:37<09:57,  2.00it/s]
  5%|▌         | 69/1261 [00:37<09:47,  2.03it/s]
  6%|▌         | 70/1261 [00:38<09:44,  2.04it/s]
  6%|▌         | 71/1261 [00:38<09:44,  2.04it/s]
  6%|▌         | 72/1261 [00:39<09:46,  2.03it/s]
  6%|▌         | 73/1261 [00:39<09:44,  2.03it/s]
  6%|▌         | 74/1261 [00:40<10:17,  1.92it/s]
  6%|▌         | 75/1261 [00:40<10:03,  1.97it/s]
  6%|▌         | 76/1261 [00:41<09:59,  1.98it/s]
  6%|▌         | 77/1261 [00:42<10:17,  1.92it/s]
  6%|▌         | 78/1261 [00:42<10:21,  1.90it/s]
  6%|▋         | 79/1261 [00:43<10:29,  1.88it/s]
  6%|▋         | 80/1261 [00:43<10:20,  1.90it/s]
  6%|▋         | 81/1261 [00:44<10:13,  1.92it/s]
  7%|▋         | 82/1261 [00:44<10:22,  1.89it/s]
  7%|▋         | 83/1261 [00:45<10:16,  1.91it/s]
  7%|▋         | 84/1261 [00:45<10:15,  1.91it/s]
  7%|▋         | 85/1261 [00:46<10:12,  1.92it/s]
  7%|▋         | 86/1261 [00:46<10:07,  1.93it/s]
  7%|▋         | 87/1261 [00:47<10:09,  1.93it/s]
  7%|▋         | 88/1261 [00:47<10:11,  1.92it/s]
  7%|▋         | 89/1261 [00:48<09:59,  1.95it/s]
  7%|▋         | 90/1261 [00:48<10:03,  1.94it/s]
  7%|▋         | 91/1261 [00:49<09:58,  1.95it/s]
  7%|▋         | 92/1261 [00:49<10:13,  1.90it/s]
  7%|▋         | 93/1261 [00:50<10:04,  1.93it/s]
  7%|▋         | 94/1261 [00:51<10:55,  1.78it/s]
  8%|▊         | 95/1261 [00:51<11:09,  1.74it/s]
  8%|▊         | 96/1261 [00:52<12:25,  1.56it/s]
  8%|▊         | 97/1261 [00:53<12:23,  1.57it/s]
  8%|▊         | 98/1261 [00:53<11:38,  1.66it/s]
  8%|▊         | 99/1261 [00:54<11:38,  1.66it/s]
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-24-6a8256769381> in <module>()
      2         .subclip((0,0.1))
      3         .resize(0.5))
----> 4 white_clip.write_gif("project_video_output.gif")

<decorator-gen-180> in write_gif(self, filename, fps, program, opt, fuzz, verbose, loop, dispose, colors, tempfiles)

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/decorators.py in requires_duration(f, clip, *a, **k)
     52         raise ValueError("Attribute 'duration' not set")
     53     else:
---> 54         return f(clip, *a, **k)
     55 
     56 

<decorator-gen-179> in write_gif(self, filename, fps, program, opt, fuzz, verbose, loop, dispose, colors, tempfiles)

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/decorators.py in convert_masks_to_RGB(f, clip, *a, **k)
     20     if clip.ismask:
     21         clip = clip.to_RGB()
---> 22     return f(clip, *a, **k)
     23 
     24 @decorator.decorator

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/video/VideoClip.py in write_gif(self, filename, fps, program, opt, fuzz, verbose, loop, dispose, colors, tempfiles)
    473         if program == 'imageio':
    474             write_gif_with_image_io(self, filename, fps=fps, opt=opt, loop=loop,
--> 475                                     verbose=verbose, colors=colors)
    476         elif tempfiles:
    477             #convert imageio opt variable to something that can be used with

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/video/io/gif_writers.py in write_gif_with_image_io(clip, filename, fps, opt, loop, colors, verbose)
    284     verbose_print(verbose, "\n[MoviePy] Building file %s with imageio\n"%filename)
    285 
--> 286     for frame in clip.iter_frames(fps=fps, progress_bar=True, dtype='uint8'):
    287 
    288         writer.append_data(frame)

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/tqdm/_tqdm.py in __iter__(self)
    831 """, fp_write=getattr(self.fp, 'write', sys.stderr.write))
    832 
--> 833             for obj in iterable:
    834                 yield obj
    835                 # Update and print the progressbar.

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/Clip.py in generator()
    473         def generator():
    474             for t in np.arange(0, self.duration, 1.0/fps):
--> 475                 frame = self.get_frame(t)
    476                 if (dtype is not None) and (frame.dtype != dtype):
    477                     frame = frame.astype(dtype)

<decorator-gen-138> in get_frame(self, t)

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/decorators.py in wrapper(f, *a, **kw)
     87         new_kw = {k: fun(v) if k in varnames else v
     88                  for (k,v) in kw.items()}
---> 89         return f(*new_a, **new_kw)
     90     return decorator.decorator(wrapper)
     91 

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/Clip.py in get_frame(self, t)
     93                 return frame
     94         else:
---> 95             return self.make_frame(t)
     96 
     97     def fl(self, fun, apply_to=[], keep_duration=True):

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/Clip.py in <lambda>(t)
    134 
    135         #mf = copy(self.make_frame)
--> 136         newclip = self.set_make_frame(lambda t: fun(self.get_frame, t))
    137 
    138         if not keep_duration:

~/miniconda3/envs/carnd-term1/lib/python3.5/site-packages/moviepy/video/VideoClip.py in <lambda>(gf, t)
    531         `get_frame(t)` by another frame,  `image_func(get_frame(t))`
    532         """
--> 533         return self.fl(lambda gf, t: image_func(gf(t)), apply_to)
    534 
    535     # --------------------------------------------------------------

<ipython-input-16-2cc16f5a033b> in process_image(image)
      4 def process_image(image):
      5     # Apply pipeline to the image to create black and white image
----> 6     img = pipeline(image)
      7 
      8 

<ipython-input-7-511c274a9fa6> in pipeline(img)
     12     ksize = 7
     13     # Apply each of the thresholding functions
---> 14     gradx = abs_sobel_thresh(gray, orient='x', sobel_kernel=ksize, thresh=(10, 255))
     15     grady = abs_sobel_thresh(gray, orient='y', sobel_kernel=ksize, thresh=(60, 255))
     16     mag_binary = mag_thresh(gray, sobel_kernel=ksize, mag_thresh=(40, 255))

KeyboardInterrupt: 
In [ ]: